Expose tmem "freeable" memory for use by management tools.
Management tools looking for a machine with available
memory often look at free_memory to determine if there
is enough physical memory to house a new or migrating
guest. Since tmem absorbs much or all free memory,
and since "ephemeral" tmem memory can be synchronously
freed, management tools need more data -- not only how
much memory is "free" but also how much memory is
"freeable" by tmem if tmem is told (via an already
existing tmem hypercall) to relinquish freeable memory.
This patch provides that extra piece of data (in MB).
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
return Py_BuildValue("s", buffer);
case TMEMC_FLUSH:
return Py_BuildValue("i", rc);
+ case TMEMC_QUERY_FREEABLE_MB:
+ return Py_BuildValue("i", rc);
case TMEMC_THAW:
case TMEMC_FREEZE:
case TMEMC_DESTROY:
('tmem_set_weight', None),
('tmem_set_cap', None),
('tmem_set_compress', None),
+ ('tmem_query_freeable_mb', None),
('tmem_shared_auth', None)]
host_funcs = [('get_by_name_label', None),
return xen_api_error(e)
return xen_api_success_void()
+ def host_tmem_query_freeable_mb(self, _, host_ref):
+ node = XendNode.instance()
+ try:
+ pages = node.tmem_query_freeable_mb()
+ except Exception, e:
+ return xen_api_error(e)
+ return xen_api_success(pages is None and -1 or pages)
+
def host_tmem_shared_auth(self, _, host_ref, cli_id, uuid_str, auth):
node = XendNode.instance()
try:
TMEMC_SET_WEIGHT = 5
TMEMC_SET_CAP = 6
TMEMC_SET_COMPRESS = 7
+TMEMC_QUERY_FREEABLE_MB = 8
buf = ''
return self.xc.tmem_control(pool_id, subop, cli_id, arg1, arg2, arg3, buf)
+ def tmem_query_freeable_mb(self):
+ pool_id = -1
+ cli_id = -1
+ subop = TMEMC_QUERY_FREEABLE_MB
+ arg1 = 0
+ arg2 = 0
+ arg3 = 0
+ buf = ''
+ return self.xc.tmem_control(pool_id, subop, cli_id, arg1, arg2, arg3, buf)
+
def tmem_shared_auth(self, cli_id, uuid_str, auth):
return self.xc.tmem_auth(cli_id, uuid_str, auth)
'tmem_list', 'tmem_freeze', 'tmem_thaw',
'tmem_flush', 'tmem_destroy', 'tmem_set_weight',
'tmem_set_cap', 'tmem_set_compress',
- 'tmem_shared_auth'],
+ 'tmem_query_freeable_mb', 'tmem_shared_auth'],
'node'),
(XendDmesg, ['info', 'clear'], 'node.dmesg')]:
inst = type.instance()
'tmem-set' : ('[<Domain>|-a|--all] [weight=<weight>] [cap=<cap>] '
'[compress=<compress>]',
'Change tmem settings.'),
+ 'tmem-freeable' : ('', 'Print number of freeable tmem pages.'),
'tmem-shared-auth' : ('[<Domain>|-a|--all] [--uuid=<uuid>] [--auth=<0|1>]', 'De/authenticate shared tmem pool.'),
# security
if compress is not None:
server.xend.node.tmem_set_compress(domid, compress)
+def xm_tmem_freeable_mb(args):
+ if serverType == SERVER_XEN_API:
+ print server.xenapi.host.tmem_query_freeable_mb()
+ else:
+ print server.xend.node.tmem_query_freeable_mb()
+
def xm_tmem_shared_auth(args):
try:
(options, params) = getopt.gnu_getopt(args, 'au:A:', ['all','uuid=','auth='])
"tmem-destroy": xm_tmem_destroy,
"tmem-list": xm_tmem_list,
"tmem-set": xm_tmem_set,
+ "tmem-freeable": xm_tmem_freeable_mb,
"tmem-shared-auth": xm_tmem_shared_auth,
}
pool_t *pool;
int i;
- if ( (pool = tmem_malloc(pool_t,NULL)) == NULL )
+ if ( (pool = tmh_alloc_infra(sizeof(pool_t),__alignof__(pool_t))) == NULL )
return NULL;
for (i = 0; i < OBJ_HASH_BUCKETS; i++)
pool->obj_rb_root[i] = RB_ROOT;
INVERT_SENTINEL(pool,POOL);
pool->client = NULL;
list_del(&pool->pool_list);
- tmem_free(pool,sizeof(pool_t),NULL);
+ tmh_free_infra(pool);
}
/* register new_client as a user of this shared pool and return new
static client_t *client_create(cli_id_t cli_id)
{
- client_t *client = tmem_malloc(client_t,NULL);
+ client_t *client = tmh_alloc_infra(sizeof(client_t),__alignof__(client_t));
int i;
printk("tmem: initializing tmem capability for %s=%d...",cli_id_str,cli_id);
{
printk("failed... can't allocate host-dependent part of client\n");
if ( client )
- tmem_free(client,sizeof(client_t),NULL);
+ tmh_free_infra(client);
return NULL;
}
tmh_set_client_from_id(client,cli_id);
case TMEMC_SET_COMPRESS:
ret = tmemc_set_var(op->u.ctrl.cli_id,subop,op->u.ctrl.arg1);
break;
+ case TMEMC_QUERY_FREEABLE_MB:
+ ret = tmh_freeable_mb();
+ break;
case TMEMC_SAVE_BEGIN:
case TMEMC_RESTORE_BEGIN:
case TMEMC_SAVE_GET_VERSION:
EXPORT int opt_tmem_lock = 0;
integer_param("tmem_lock", opt_tmem_lock);
+EXPORT atomic_t freeable_page_count = ATOMIC_INIT(0);
+
#ifdef COMPARE_COPY_PAGE_SSE2
DECL_CYC_COUNTER(pg_copy1);
DECL_CYC_COUNTER(pg_copy2);
#define TMEMC_SET_WEIGHT 5
#define TMEMC_SET_CAP 6
#define TMEMC_SET_COMPRESS 7
-#define TMEMC_SHARED_POOL_AUTH 8
-#define TMEMC_SHARED_POOL_DEAUTH 9
+#define TMEMC_QUERY_FREEABLE_MB 8
#define TMEMC_SAVE_BEGIN 10
#define TMEMC_SAVE_GET_VERSION 11
#define TMEMC_SAVE_GET_MAXPOOLS 12
extern struct page_list_head tmh_page_list;
extern spinlock_t tmh_page_list_lock;
extern unsigned long tmh_page_list_pages;
+extern atomic_t freeable_page_count;
extern spinlock_t tmem_lock;
extern spinlock_t tmem_spinlock;
}
/*
- * Ephemeral memory allocation for persistent data
+ * Memory allocation for persistent data
*/
static inline bool_t domain_fully_allocated(struct domain *d)
if ( pi == NULL && !no_heap )
pi = alloc_domheap_pages(0,0,MEMF_tmem);
ASSERT((pi == NULL) || IS_VALID_PAGE(pi));
+ if ( pi != NULL )
+ atomic_inc(&freeable_page_count);
return pi;
}
{
ASSERT(IS_VALID_PAGE(pi));
tmh_page_list_put(pi);
+ atomic_dec(&freeable_page_count);
}
static inline unsigned int tmem_subpage_maxsize(void)
return tmh_mempool_maxalloc;
}
+static inline unsigned long tmh_freeable_mb(void)
+{
+ return (tmh_avail_pages() + _atomic_read(freeable_page_count)) >>
+ (20 - PAGE_SHIFT);
+}
+
+/*
+ * Memory allocation for "infrastructure" data
+ */
+
+static inline void *tmh_alloc_infra(size_t size, size_t align)
+{
+ return _xmalloc(size,align);
+}
+
+static inline void tmh_free_infra(void *p)
+{
+ return xfree(p);
+}
+
#define tmh_lock_all opt_tmem_lock
#define tmh_flush_dups opt_tmem_flush_dups
#define tmh_called_from_tmem(_memflags) (_memflags & MEMF_tmem)